import numpy as np
import pandas as pd
import keras
from keras.models import Sequential
from keras.layers import LSTM, Dense, Dropout
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error
import json
climate_df = pd.read_csv("C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\climate_hour.csv", delimiter=",", index_col=None, header=0)
print(climate_df.shape)
print(climate_df.dtypes)
print(climate_df.head())
climate_df['Date Time'] = pd.to_datetime(climate_df['Date Time'], format='%d.%m.%Y %H:%M:%S')
climate_df.columns = ['date_time', 'atm_pressure', 'temperature', 'pot_temp_K', 'dew_temp_C', 'relative_humidity',
'sat_water_vap_pressure', 'act_water_vap_pressure', 'water_vap_pressure_deficit',
'specific_humidity', 'water_vap_concentration', 'air_density', 'wind_velocity',
'max_wind_velocity', 'wind_dir']
print(climate_df.dtypes)
print(climate_df.head())
climate_df.isnull().sum().sum()
dup_ndx = climate_df.index.duplicated(keep = False)
sum(dup_ndx)
plt.figure(figsize=(16, 6))
plt.plot(climate_df['date_time'], climate_df['temperature'])
plt.xlabel('Date and Time')
plt.ylabel('Temperature (C)')
plt.title('Temperature (C) Over Time')
climate_df.set_index(['date_time'], inplace = True)
print(climate_df.head())
values = climate_df.values
groups = range(1, climate_df.shape[1])
# plot each column
i = 1
plt.figure(figsize=(12, 12))
for group in groups:
plt.subplot(len(groups), 1, i)
plt.plot(values[:, group])
plt.title(climate_df.columns[group], y = 0.5, x = 1.01, loc = 'left')
i += 1
plt.show()
climate_corr = climate_df.corr(method = "pearson")
print(climate_corr)
date_time = pd.Series(climate_df.index.values)
date_time.head()
var_names = list(climate_df.columns)
print(var_names)
date_time = pd.Series(climate_df.index.values)
date_time.head()
min_max_scaler = MinMaxScaler(feature_range=(0, 1)).fit(climate_df)
climate_norm_np = min_max_scaler.transform(climate_df)
climate_norm_df = pd.DataFrame(climate_norm_np, columns = var_names, index = date_time)
climate_norm_df.head()
print(climate_norm_df.min().min())
print(climate_norm_df.max().max())
print(np.shape(climate_norm_np))
print(climate_df.shape)
datetime_cutoff = datetime.strptime('2015-01-01 00:00:00', '%Y-%m-%d %H:%M:%S')
test_pts = date_time >= datetime_cutoff
test_ndx = np.where(test_pts == True)
train_ndx = np.where(test_pts == False)
train_norm_df = climate_norm_df.iloc[train_ndx]
test_norm_df = climate_norm_df.iloc[test_ndx]
train_norm_df.tail()
test_norm_df.head()
def create_RNN_input(in_df, timestep, dropnan, output_var):
var_names = list(in_df.columns.values)
num_vars = len(var_names)
x_df_list = []
x_df_names = []
# input sequence (t-n, ... t-1)
for i in range(timestep, 0, -1):
x_df_list.append(in_df.shift(i))
x_df_names += [('%s(t-%d)' % (var_names[j], i)) for j in range(len(var_names))]
# put it all together
x_df = pd.concat(x_df_list, axis = 1)
x_df.columns = x_df_names
# forecast sequence (t, t+1, ... t+n)
y_df = in_df[output_var].shift(-timestep)
# drop rows with NaN values
if dropnan:
x_df.dropna(inplace = True)
y_df.dropna(inplace = True)
out_ndx = list(x_df.index.values)
y_df.index = out_ndx
return x_df, y_df
train_x_df, train_y_df = create_RNN_input(train_norm_df, 24, True, 'temperature')
print(train_x_df.shape)
print(train_y_df.shape)
train_x_df.head(3)
train_y_df.head(3)
train_norm_df.iloc[0:3]
train_norm_df.iloc[23:26]
train_df_file = 'C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\train_norm_df.csv'
train_norm_df.to_csv(train_df_file, header = True, index = False)
train_x_df_file = 'C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\train_x_df.csv'
train_x_df.to_csv(train_x_df_file, header = True, index = False)
train_y_df_file = 'C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\train_y_df.csv'
train_y_df.to_csv(train_y_df_file, header = True, index = False)
test_x_df, test_y_df = create_RNN_input(test_norm_df, 24, True, 'temperature')
test_df_file = 'C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\test_norm_df.csv'
test_norm_df.to_csv(test_df_file, header = True, index = False)
test_x_df_file = 'C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\test_x_df.csv'
test_x_df.to_csv(test_x_df_file, header = True, index = False)
test_y_df_file = 'C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\test_y_df.csv'
test_y_df.to_csv(test_y_df_file, header = True, index = False)
train_x_np = np.array(train_x_df)
train_x_np = train_x_np.reshape((train_x_df.shape[0], 24, train_norm_df.shape[1]))
print(train_x_df.shape)
print(train_x_np.shape)
test_x_np = np.array(test_x_df)
test_x_np = test_x_np.reshape((test_x_df.shape[0], 24, test_norm_df.shape[1]))
print(test_x_df.shape)
print(test_x_np.shape)
train_y_np = np.array(train_y_df)
test_y_np = np.array(test_y_df)
datetime_pred_cutoff = datetime.strptime('2014-12-31 00:00:00', '%Y-%m-%d %H:%M:%S')
pred_test_pts = date_time >= datetime_pred_cutoff
pred_test_ndx = np.where(pred_test_pts == True)
pred_test_norm_df = climate_norm_df.iloc[pred_test_ndx]
pred_test_x_df, pred_test_y_df = create_RNN_input(pred_test_norm_df, 24, True, 'temperature')
pred_test_df_file = 'C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\pred_test_norm_df.csv'
pred_test_norm_df.to_csv(pred_test_df_file, header = True, index = False)
pred_test_x_df_file = 'C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\pred_test_x_df.csv'
pred_test_x_df.to_csv(pred_test_x_df_file, header = True, index = False)
pred_test_y_df_file = 'C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\pred_test_y_df.csv'
pred_test_y_df.to_csv(pred_test_y_df_file, header = True, index = False)
pred_test_x_np = np.array(pred_test_x_df)
pred_test_x_np = pred_test_x_np.reshape((pred_test_x_df.shape[0], 24, pred_test_norm_df.shape[1]))
print(pred_test_x_df.shape)
print(pred_test_x_np.shape)
pred_test_y_np = np.array(pred_test_y_df)
def plot_model_losses(model_hist_dict, model_desc):
model_loss = model_hist_dict['loss']
model_val_loss = model_hist_dict['val_loss']
model_epochs = range(1, len(model_loss) + 1)
# "bo" is for "blue dot"
plt.plot(model_epochs, model_loss, 'bo', label = 'Training Loss')
# b is for "solid blue line"
plot_title = model_desc + ' Training and Validation Loss'
plt.plot(model_epochs, model_val_loss, 'b', label = 'Validation Loss')
plt.title(plot_title)
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
def evaluate_model(model, model_hist_dict, x_test_np, y_test_np, pred_x_test_np, pred_y_test_np, model_batch,
model_num):
score = model.evaluate(x_test_np, y_test_np, batch_size = model_batch)
print('Model %d Test Loss Score: ' % model_num, score)
plot_model_losses(model_hist_dict, 'Model %d' % model_num)
pred_score = model.evaluate(pred_x_test_np, pred_y_test_np, batch_size = model_batch)
print('Model %d Kaggle Test Loss Score: ' % model_num, pred_score)
plot_model_losses(model_hist_dict, 'Model %d Kaggle' % model_num)
return score, pred_score
def create_model_predictions(model, x_test_np, norm_test_df, y_test_np):
model_predictions = model.predict(x_test_np)
num_test_pts = norm_test_df.shape[0]
test_x_norm = norm_test_df.iloc[24:(num_test_pts + 1), :]
date_time_x = pd.Series(test_x_norm.index.values)
# invert scaling for forecast
inv_x_y_norm = test_x_norm[:]
inv_x_y_norm['temperature'] = model_predictions
inv_x_y = min_max_scaler.inverse_transform(inv_x_y_norm)
inv_x_y_df = pd.DataFrame(inv_x_y, columns = var_names, index = date_time_x)
inv_yhat = inv_x_y_df['temperature']
# invert scaling for actual
inv_x_y_test_norm = test_x_norm[:]
inv_x_y_test_norm['temperature'] = y_test_np
inv_x_y_test = min_max_scaler.inverse_transform(inv_x_y_test_norm)
inv_x_y_test_df = pd.DataFrame(inv_x_y_test, columns = var_names, index = date_time_x)
inv_y = inv_x_y_test_df['temperature']
# calculate RMSE
rmse = np.sqrt(mean_squared_error(inv_y, inv_yhat))
mae = mean_absolute_error(inv_y, inv_yhat)
return inv_yhat, inv_y, date_time_x, rmse, mae
def plot_predict_data(date_time_x, inv_y, inv_yhat, model_desc):
plt.figure(figsize=(16, 6))
plt.plot(date_time_x, inv_y, 'r', label = 'Inverse Temperature')
plot_title = model_desc + ' Temperature Outputs'
plt.plot(date_time_x, inv_yhat, 'b', label = 'Inverse Predicted Temperature')
plt.title(plot_title)
plt.xlabel('Time')
plt.ylabel('Temperature (C)')
plt.legend()
plt.show()
def create_pred_output(date_time_x, inv_yhat):
date_time_list = []
for i in range(len(date_time_x)):
date_time_list.append(date_time_x.iloc[i].strftime('%d.%m.%Y_%H:%M:%S'))
pred_out_df = pd.DataFrame(date_time_list, columns = ['date_time'])
pred_out_df['temperature'] = inv_yhat.values
return pred_out_df
def create_and_save_pred_data(model, x_test_np, norm_test_df, y_test_np, pred_x_test_np, pred_norm_test_df,
pred_y_test_np, model_num):
inv_yhat, inv_y, date_time_x, rmse, mae = create_model_predictions(model, x_test_np, norm_test_df, y_test_np)
print('Model %d Test RMSE: %.6f' % (model_num, rmse))
print('Model %d Test MAE: %.6f' % (model_num, mae))
print('\n')
plot_predict_data(date_time_x, inv_y, inv_yhat, 'Model %d' % model_num)
pred_out_df = create_pred_output(date_time_x, inv_yhat)
pred_inv_yhat, pred_inv_y, pred_date_time_x, pred_rmse, pred_mae = create_model_predictions(model,
pred_x_test_np, pred_norm_test_df, pred_y_test_np)
print('Model %d Kaggle Prediction Test RMSE: %.6f' % (model_num, pred_rmse))
print('Model %d Kaggle Prediction Test MAE: %.6f' % (model_num, pred_mae))
plot_predict_data(pred_date_time_x, pred_inv_y, pred_inv_yhat, 'Model %d Kaggle Prediction' % model_num)
kaggle_pred_out_df = create_pred_output(pred_date_time_x, pred_inv_yhat)
kaggle_act_out_df = create_pred_output(pred_date_time_x, pred_inv_y)
out_file = 'C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\Model_' + \
str(model_num) + '_Predictions.csv'
kaggle_pred_out_df.to_csv(out_file, header = True, index = False)
act_out_file = 'C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\Model_' + \
str(model_num) + '_Actual.csv'
kaggle_act_out_df.to_csv(act_out_file, header = True, index = False)
return rmse, mae, pred_rmse, pred_mae
model_nums = []
scores = []
pred_scores = []
rmse_vals = []
pred_rmse_vals = []
mae_vals = []
pred_mae_vals = []
model1 = Sequential()
model1.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2])))
model1.add(Dense(1))
model1.summary()
model1.compile(loss = 'mae', optimizer = 'adam')
batch_model1 = 10
model1_history = model1.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model1,
validation_data = (test_x_np, test_y_np))
model1.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model1.h5')
model1_hist_dict = model1_history.history
model1_hist_json = json.dumps(model1_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model1_hist.json', 'w')
f.write(model1_hist_json)
f.close()
#model1 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model1.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model1_hist.json', 'r')
#model1_hist_json = f.read()
#f.close()
#model1_hist_dict = json.loads(model1_hist_json)
#print(model1.summary())
#print(model1_hist_dict)
score1, pred_score1 = evaluate_model(model1, model1_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model1, 1)
rmse1, mae1, pred_rmse1, pred_mae1 = create_and_save_pred_data(model1, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 1)
model_nums.append(1)
scores.append(score1)
pred_scores.append(pred_score1)
rmse_vals.append(rmse1)
pred_rmse_vals.append(pred_rmse1)
mae_vals.append(mae1)
pred_mae_vals.append(pred_mae1)
model2 = Sequential()
model2.add(LSTM(150, input_shape=(train_x_np.shape[1], train_x_np.shape[2])))
model2.add(Dense(1))
model2.summary()
model2.compile(loss = 'mae', optimizer = 'adam')
batch_model2 = 10
model2_history = model2.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model2,
validation_data = (test_x_np, test_y_np))
model2.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model2.h5')
model2_hist_dict = model2_history.history
model2_hist_json = json.dumps(model2_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model2_hist.json', 'w')
f.write(model2_hist_json)
f.close()
#model2 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model2.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model2_hist.json', 'r')
#model2_hist_json = f.read()
#f.close()
#model2_hist_dict = json.loads(model2_hist_json)
#print(model2.summary())
#print(model2_hist_dict)
score2, pred_score2 = evaluate_model(model2, model2_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model2, 2)
rmse2, mae2, pred_rmse2, pred_mae2 = create_and_save_pred_data(model2, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 2)
model_nums.append(2)
scores.append(score2)
pred_scores.append(pred_score2)
rmse_vals.append(rmse2)
pred_rmse_vals.append(pred_rmse2)
mae_vals.append(mae2)
pred_mae_vals.append(pred_mae2)
model3 = Sequential()
model3.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2]), return_sequences=True))
model3.add(LSTM(50))
model3.add(Dense(1))
model3.summary()
model3.compile(loss = 'mae', optimizer = 'adam')
batch_model3 = 10
model3_history = model3.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model3,
validation_data = (test_x_np, test_y_np))
model3.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model3.h5')
model3_hist_dict = model3_history.history
model3_hist_json = json.dumps(model3_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model3_hist.json', 'w')
f.write(model3_hist_json)
f.close()
#model3 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model3.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model3_hist.json', 'r')
#model3_hist_json = f.read()
#f.close()
#model3_hist_dict = json.loads(model3_hist_json)
#print(model3.summary())
#print(model3_hist_dict)
score3, pred_score3 = evaluate_model(model3, model3_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model3, 3)
rmse3, mae3, pred_rmse3, pred_mae3 = create_and_save_pred_data(model3, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 3)
model_nums.append(3)
scores.append(score3)
pred_scores.append(pred_score3)
rmse_vals.append(rmse3)
pred_rmse_vals.append(pred_rmse3)
mae_vals.append(mae3)
pred_mae_vals.append(pred_mae3)
model4 = Sequential()
model4.add(LSTM(10, input_shape=(train_x_np.shape[1], train_x_np.shape[2])))
model4.add(Dense(1))
model4.summary()
model4.compile(loss = 'mae', optimizer = 'adam')
batch_model4 = 10
model4_history = model4.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model4,
validation_data = (test_x_np, test_y_np))
model4.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model4.h5')
model4_hist_dict = model4_history.history
model4_hist_json = json.dumps(model4_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model4_hist.json', 'w')
f.write(model4_hist_json)
f.close()
#model4 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model4.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model4_hist.json', 'r')
#model4_hist_json = f.read()
#f.close()
#model4_hist_dict = json.loads(model4_hist_json)
#print(model4.summary())
#print(model4_hist_dict)
score4, pred_score4 = evaluate_model(model4, model4_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model4, 4)
rmse4, mae4, pred_rmse4, pred_mae4 = create_and_save_pred_data(model4, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 4)
model_nums.append(4)
scores.append(score4)
pred_scores.append(pred_score4)
rmse_vals.append(rmse4)
pred_rmse_vals.append(pred_rmse4)
mae_vals.append(mae4)
pred_mae_vals.append(pred_mae4)
model5 = Sequential()
model5.add(LSTM(20, input_shape=(train_x_np.shape[1], train_x_np.shape[2]), return_sequences=True))
model5.add(LSTM(20))
model5.add(Dense(1))
model5.summary()
model5.compile(loss = 'mae', optimizer = 'adam')
batch_model5 = 10
model5_history = model5.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model5,
validation_data = (test_x_np, test_y_np))
model5.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model5.h5')
model5_hist_dict = model5_history.history
model5_hist_json = json.dumps(model5_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model5_hist.json', 'w')
f.write(model5_hist_json)
f.close()
#model5 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model5.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model5_hist.json', 'r')
#model5_hist_json = f.read()
#f.close()
#model5_hist_dict = json.loads(model5_hist_json)
#print(model5.summary())
#print(model5_hist_dict)
score5, pred_score5 = evaluate_model(model5, model5_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model5, 5)
rmse5, mae5, pred_rmse5, pred_mae5 = create_and_save_pred_data(model5, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 5)
model_nums.append(5)
scores.append(score5)
pred_scores.append(pred_score5)
rmse_vals.append(rmse5)
pred_rmse_vals.append(pred_rmse5)
mae_vals.append(mae5)
pred_mae_vals.append(pred_mae5)
model6 = Sequential()
model6.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2])))
model6.add(Dense(50))
model6.add(Dense(1))
model6.summary()
model6.compile(loss = 'mae', optimizer = 'adam')
batch_model6 = 10
model6_history = model6.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model6,
validation_data = (test_x_np, test_y_np))
model6.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model6.h5')
model6_hist_dict = model6_history.history
model6_hist_json = json.dumps(model6_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model6_hist.json', 'w')
f.write(model6_hist_json)
f.close()
#model6 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model6.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model6_hist.json', 'r')
#model6_hist_json = f.read()
#f.close()
#model6_hist_dict = json.loads(model6_hist_json)
#print(model6.summary())
#print(model6_hist_dict)
score6, pred_score6 = evaluate_model(model6, model6_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model6, 6)
rmse6, mae6, pred_rmse6, pred_mae6 = create_and_save_pred_data(model6, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 6)
model_nums.append(6)
scores.append(score6)
pred_scores.append(pred_score6)
rmse_vals.append(rmse6)
pred_rmse_vals.append(pred_rmse6)
mae_vals.append(mae6)
pred_mae_vals.append(pred_mae6)
model7 = Sequential()
model7.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2])))
model7.add(Dense(50))
model7.add(Dropout(0.3))
model7.add(Dense(1))
model7.summary()
model7.compile(loss = 'mae', optimizer = 'adam')
batch_model7 = 10
model7_history = model7.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model7,
validation_data = (test_x_np, test_y_np))
model7.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model7.h5')
model7_hist_dict = model7_history.history
model7_hist_json = json.dumps(model7_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model7_hist.json', 'w')
f.write(model7_hist_json)
f.close()
#model7 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model7.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model7_hist.json', 'r')
#model7_hist_json = f.read()
#f.close()
#model7_hist_dict = json.loads(model7_hist_json)
#print(model7.summary())
#print(model7_hist_dict)
score7, pred_score7 = evaluate_model(model7, model7_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model7, 7)
rmse7, mae7, pred_rmse7, pred_mae7 = create_and_save_pred_data(model7, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 7)
model_nums.append(7)
scores.append(score7)
pred_scores.append(pred_score7)
rmse_vals.append(rmse7)
pred_rmse_vals.append(pred_rmse7)
mae_vals.append(mae7)
pred_mae_vals.append(pred_mae7)
model8 = Sequential()
model8.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2]), activation = 'relu'))
model8.add(Dense(1))
model8.summary()
model8.compile(loss = 'mae', optimizer = 'adam')
batch_model8 = 10
model8_history = model8.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model8,
validation_data = (test_x_np, test_y_np))
model8.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model8.h5')
model8_hist_dict = model8_history.history
model8_hist_json = json.dumps(model8_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model8_hist.json', 'w')
f.write(model8_hist_json)
f.close()
#model8 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model8.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model8_hist.json', 'r')
#model8_hist_json = f.read()
#f.close()
#model8_hist_dict = json.loads(model8_hist_json)
#print(model8.summary())
#print(model8_hist_dict)
score8, pred_score8 = evaluate_model(model8, model8_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model8, 8)
rmse8, mae8, pred_rmse8, pred_mae8 = create_and_save_pred_data(model8, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 8)
model_nums.append(8)
scores.append(score8)
pred_scores.append(pred_score8)
rmse_vals.append(rmse8)
pred_rmse_vals.append(pred_rmse8)
mae_vals.append(mae8)
pred_mae_vals.append(pred_mae8)
model9 = Sequential()
model9.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2])))
model9.add(Dense(1, activation = 'relu'))
model9.summary()
model9.compile(loss = 'mae', optimizer = 'adam')
batch_model9 = 10
model9_history = model8.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model9,
validation_data = (test_x_np, test_y_np))
model9.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model9.h5')
model9_hist_dict = model9_history.history
model9_hist_json = json.dumps(model9_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model9_hist.json', 'w')
f.write(model9_hist_json)
f.close()
#model9 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model9.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model9_hist.json', 'r')
#model9_hist_json = f.read()
#f.close()
#model9_hist_dict = json.loads(model9_hist_json)
#print(model9.summary())
#print(model9_hist_dict)
score9, pred_score9 = evaluate_model(model9, model9_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model9, 9)
rmse9, mae9, pred_rmse9, pred_mae9 = create_and_save_pred_data(model9, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 9)
model_nums.append(9)
scores.append(score9)
pred_scores.append(pred_score9)
rmse_vals.append(rmse9)
pred_rmse_vals.append(pred_rmse9)
mae_vals.append(mae9)
pred_mae_vals.append(pred_mae9)
model10 = Sequential()
model10.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2])))
model10.add(Dense(1))
model10.summary()
model10.compile(loss = 'mae', optimizer = 'adam')
batch_model10 = 5
model10_history = model10.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model10,
validation_data = (test_x_np, test_y_np))
model10.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model10.h5')
model10_hist_dict = model10_history.history
model10_hist_json = json.dumps(model10_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model10_hist.json', 'w')
f.write(model10_hist_json)
f.close()
#model10 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model10.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model10_hist.json', 'r')
#model10_hist_json = f.read()
#f.close()
#model10_hist_dict = json.loads(model10_hist_json)
#print(model10.summary())
#print(model10_hist_dict)
score10, pred_score10 = evaluate_model(model10, model10_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model10, 10)
rmse10, mae10, pred_rmse10, pred_mae10 = create_and_save_pred_data(model10, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 10)
model_nums.append(10)
scores.append(score10)
pred_scores.append(pred_score10)
rmse_vals.append(rmse10)
pred_rmse_vals.append(pred_rmse10)
mae_vals.append(mae10)
pred_mae_vals.append(pred_mae10)
model11 = Sequential()
model11.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2])))
model11.add(Dense(1))
model11.summary()
model11.compile(loss = 'mae', optimizer = 'adam')
batch_model11 = 100
model11_history = model11.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model11,
validation_data = (test_x_np, test_y_np))
model11.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model11.h5')
model11_hist_dict = model11_history.history
model11_hist_json = json.dumps(model11_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model11_hist.json', 'w')
f.write(model11_hist_json)
f.close()
#model11 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model11.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model11_hist.json', 'r')
#model11_hist_json = f.read()
#f.close()
#model11_hist_dict = json.loads(model11_hist_json)
#print(model11.summary())
#print(model11_hist_dict)
score11, pred_score11 = evaluate_model(model11, model11_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model11, 11)
rmse11, mae11, pred_rmse11, pred_mae11 = create_and_save_pred_data(model11, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 11)
model_nums.append(11)
scores.append(score11)
pred_scores.append(pred_score11)
rmse_vals.append(rmse11)
pred_rmse_vals.append(pred_rmse11)
mae_vals.append(mae11)
pred_mae_vals.append(pred_mae11)
model12 = Sequential()
model12.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2]), dropout = 0.2))
model12.add(Dense(1))
model12.summary()
model12.compile(loss = 'mae', optimizer = 'adam')
batch_model12 = 10
model12_history = model12.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model12,
validation_data = (test_x_np, test_y_np))
model12.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model12.h5')
model12_hist_dict = model12_history.history
model12_hist_json = json.dumps(model12_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model12_hist.json', 'w')
f.write(model12_hist_json)
f.close()
#model12 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model12.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model12_hist.json', 'r')
#model12_hist_json = f.read()
#f.close()
#model12_hist_dict = json.loads(model12_hist_json)
#print(model12.summary())
#print(model12_hist_dict)
score12, pred_score12 = evaluate_model(model12, model12_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model12, 12)
rmse12, mae12, pred_rmse12, pred_mae12 = create_and_save_pred_data(model12, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 12)
model_nums.append(12)
scores.append(score12)
pred_scores.append(pred_score12)
rmse_vals.append(rmse12)
pred_rmse_vals.append(pred_rmse12)
mae_vals.append(mae12)
pred_mae_vals.append(pred_mae12)
model13 = Sequential()
model13.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2]), recurrent_dropout = 0.2))
model13.add(Dense(1))
model13.summary()
model13.compile(loss = 'mae', optimizer = 'adam')
batch_model13 = 10
model13_history = model13.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model13,
validation_data = (test_x_np, test_y_np))
model13.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model13.h5')
model13_hist_dict = model13_history.history
model13_hist_json = json.dumps(model13_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model13_hist.json', 'w')
f.write(model13_hist_json)
f.close()
#model13 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model13.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model13_hist.json', 'r')
#model13_hist_json = f.read()
#f.close()
#model13_hist_dict = json.loads(model13_hist_json)
#print(model13.summary())
#print(model13_hist_dict)
score13, pred_score13 = evaluate_model(model13, model13_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model13, 13)
rmse13, mae13, pred_rmse13, pred_mae13 = create_and_save_pred_data(model13, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 13)
model_nums.append(13)
scores.append(score13)
pred_scores.append(pred_score13)
rmse_vals.append(rmse13)
pred_rmse_vals.append(pred_rmse13)
mae_vals.append(mae13)
pred_mae_vals.append(pred_mae13)
model14 = Sequential()
model14.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2]), kernel_regularizer =
keras.regularizers.l2(0.001)))
model14.add(Dense(1))
model14.summary()
model14.compile(loss = 'mae', optimizer = 'adam')
batch_model14 = 10
model14_history = model14.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model14,
validation_data = (test_x_np, test_y_np))
model14.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model14.h5')
model14_hist_dict = model14_history.history
model14_hist_json = json.dumps(model14_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model14_hist.json', 'w')
f.write(model14_hist_json)
f.close()
#model14 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model14.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model14_hist.json', 'r')
#model14_hist_json = f.read()
#f.close()
#model14_hist_dict = json.loads(model14_hist_json)
#print(model14.summary())
#print(model14_hist_dict)
score14, pred_score14 = evaluate_model(model14, model14_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model14, 14)
rmse14, mae14, pred_rmse14, pred_mae14 = create_and_save_pred_data(model14, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 14)
model_nums.append(14)
scores.append(score14)
pred_scores.append(pred_score14)
rmse_vals.append(rmse14)
pred_rmse_vals.append(pred_rmse14)
mae_vals.append(mae14)
pred_mae_vals.append(pred_mae14)
model15 = Sequential()
model15.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2]), kernel_regularizer =
keras.regularizers.l2(0.1)))
model15.add(Dense(1))
model15.summary()
model15.compile(loss = 'mae', optimizer = 'adam')
batch_model15 = 10
model15_history = model15.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model15,
validation_data = (test_x_np, test_y_np))
model15.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model15.h5')
model15_hist_dict = model15_history.history
model15_hist_json = json.dumps(model15_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model15_hist.json', 'w')
f.write(model15_hist_json)
f.close()
#model15 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model15.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model15_hist.json', 'r')
#model15_hist_json = f.read()
#f.close()
#model15_hist_dict = json.loads(model15_hist_json)
#print(model15.summary())
#print(model15_hist_dict)
score15, pred_score15 = evaluate_model(model15, model15_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model15, 15)
rmse15, mae15, pred_rmse15, pred_mae15 = create_and_save_pred_data(model15, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 15)
model_nums.append(15)
scores.append(score15)
pred_scores.append(pred_score15)
rmse_vals.append(rmse15)
pred_rmse_vals.append(pred_rmse15)
mae_vals.append(mae15)
pred_mae_vals.append(pred_mae15)
model16 = Sequential()
model16.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2]), kernel_regularizer =
keras.regularizers.l1(0.001)))
model16.add(Dense(1))
model16.summary()
model16.compile(loss = 'mae', optimizer = 'adam')
batch_model16 = 10
model16_history = model16.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model16,
validation_data = (test_x_np, test_y_np))
model16.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model16.h5')
model16_hist_dict = model16_history.history
model16_hist_json = json.dumps(model16_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model16_hist.json', 'w')
f.write(model16_hist_json)
f.close()
#model16 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model16.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model16_hist.json', 'r')
#model16_hist_json = f.read()
#f.close()
#model16_hist_dict = json.loads(model16_hist_json)
#print(model16.summary())
#print(model16_hist_dict)
score16, pred_score16 = evaluate_model(model16, model16_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model16, 16)
rmse16, mae16, pred_rmse16, pred_mae16 = create_and_save_pred_data(model16, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 16)
model_nums.append(16)
scores.append(score16)
pred_scores.append(pred_score16)
rmse_vals.append(rmse16)
pred_rmse_vals.append(pred_rmse16)
mae_vals.append(mae16)
pred_mae_vals.append(pred_mae16)
model17 = Sequential()
model17.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2]), kernel_regularizer =
keras.regularizers.l1(0.1)))
model17.add(Dense(1))
model17.summary()
model17.compile(loss = 'mae', optimizer = 'adam')
batch_model17 = 10
model17_history = model17.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model17,
validation_data = (test_x_np, test_y_np))
model17.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model17.h5')
model17_hist_dict = model17_history.history
model17_hist_json = json.dumps(model17_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model17_hist.json', 'w')
f.write(model17_hist_json)
f.close()
#model17 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model17.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model17_hist.json', 'r')
#model17_hist_json = f.read()
#f.close()
#model17_hist_dict = json.loads(model17_hist_json)
#print(model17.summary())
#print(model17_hist_dict)
score17, pred_score17 = evaluate_model(model17, model17_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model17, 17)
rmse17, mae17, pred_rmse17, pred_mae17 = create_and_save_pred_data(model17, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 17)
model_nums.append(17)
scores.append(score17)
pred_scores.append(pred_score17)
rmse_vals.append(rmse17)
pred_rmse_vals.append(pred_rmse17)
mae_vals.append(mae17)
pred_mae_vals.append(pred_mae17)
model18 = Sequential()
model18.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2]), recurrent_regularizer =
keras.regularizers.l2(0.001)))
model18.add(Dense(1))
model18.summary()
model18.compile(loss = 'mae', optimizer = 'adam')
batch_model18 = 10
model18_history = model18.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model18,
validation_data = (test_x_np, test_y_np))
model18.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model18.h5')
model18_hist_dict = model18_history.history
model18_hist_json = json.dumps(model18_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model18_hist.json', 'w')
f.write(model18_hist_json)
f.close()
#model18 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model18.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model18_hist.json', 'r')
#model18_hist_json = f.read()
#f.close()
#model18_hist_dict = json.loads(model18_hist_json)
#print(model18.summary())
#print(model18_hist_dict)
score18, pred_score18 = evaluate_model(model18, model18_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model18, 18)
rmse18, mae18, pred_rmse18, pred_mae18 = create_and_save_pred_data(model18, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 18)
model_nums.append(18)
scores.append(score18)
pred_scores.append(pred_score18)
rmse_vals.append(rmse18)
pred_rmse_vals.append(pred_rmse18)
mae_vals.append(mae18)
pred_mae_vals.append(pred_mae18)
model19 = Sequential()
model19.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2]), recurrent_regularizer =
keras.regularizers.l2(0.1)))
model19.add(Dense(1))
model19.summary()
model19.compile(loss = 'mae', optimizer = 'adam')
batch_model19 = 10
model19_history = model19.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model19,
validation_data = (test_x_np, test_y_np))
model19.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model19.h5')
model19_hist_dict = model19_history.history
model19_hist_json = json.dumps(model19_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model19_hist.json', 'w')
f.write(model19_hist_json)
f.close()
#model19 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model19.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model19_hist.json', 'r')
#model19_hist_json = f.read()
#f.close()
#model19_hist_dict = json.loads(model19_hist_json)
#print(model19.summary())
#print(model19_hist_dict)
score19, pred_score19 = evaluate_model(model19, model19_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model19, 19)
rmse19, mae19, pred_rmse19, pred_mae19 = create_and_save_pred_data(model19, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 19)
model_nums.append(19)
scores.append(score19)
pred_scores.append(pred_score19)
rmse_vals.append(rmse19)
pred_rmse_vals.append(pred_rmse19)
mae_vals.append(mae19)
pred_mae_vals.append(pred_mae19)
model20 = Sequential()
model20.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2]), recurrent_regularizer =
keras.regularizers.l1(0.001)))
model20.add(Dense(1))
model20.summary()
model20.compile(loss = 'mae', optimizer = 'adam')
batch_model20 = 10
model20_history = model20.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model20,
validation_data = (test_x_np, test_y_np))
model20.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model20.h5')
model20_hist_dict = model20_history.history
model20_hist_json = json.dumps(model20_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model20_hist.json', 'w')
f.write(model20_hist_json)
f.close()
#model20 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model20.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model20_hist.json', 'r')
#model20_hist_json = f.read()
#f.close()
#model20_hist_dict = json.loads(model20_hist_json)
#print(model20.summary())
#print(model20_hist_dict)
score20, pred_score20 = evaluate_model(model20, model20_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model20, 20)
rmse20, mae20, pred_rmse20, pred_mae20 = create_and_save_pred_data(model20, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 20)
model_nums.append(20)
scores.append(score20)
pred_scores.append(pred_score20)
rmse_vals.append(rmse20)
pred_rmse_vals.append(pred_rmse20)
mae_vals.append(mae20)
pred_mae_vals.append(pred_mae20)
model21 = Sequential()
model21.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2]), recurrent_regularizer =
keras.regularizers.l1(0.1)))
model21.add(Dense(1))
model21.summary()
model21.compile(loss = 'mae', optimizer = 'adam')
batch_model21 = 10
model21_history = model21.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model21,
validation_data = (test_x_np, test_y_np))
model21.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model21.h5')
model21_hist_dict = model21_history.history
model21_hist_json = json.dumps(model21_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model21_hist.json', 'w')
f.write(model21_hist_json)
f.close()
#model21 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model21.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model21_hist.json', 'r')
#model21_hist_json = f.read()
#f.close()
#model21_hist_dict = json.loads(model21_hist_json)
#print(model21.summary())
#print(model21_hist_dict)
score21, pred_score21 = evaluate_model(model21, model21_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model21, 21)
rmse21, mae21, pred_rmse21, pred_mae21 = create_and_save_pred_data(model21, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 21)
model_nums.append(21)
scores.append(score21)
pred_scores.append(pred_score21)
rmse_vals.append(rmse21)
pred_rmse_vals.append(pred_rmse21)
mae_vals.append(mae21)
pred_mae_vals.append(pred_mae21)
model22 = Sequential()
model22.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2]), return_sequences = True))
model22.add(LSTM(50, return_sequences = False))
model22.add(Dense(50))
model22.add(Dense(1))
model22.summary()
model22.compile(loss = 'mae', optimizer = 'adam')
batch_model22 = 10
model22_history = model22.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model22,
validation_data = (test_x_np, test_y_np))
model22.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model22.h5')
model22_hist_dict = model22_history.history
model22_hist_json = json.dumps(model22_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model22_hist.json', 'w')
f.write(model22_hist_json)
f.close()
#model22 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model22.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model22_hist.json', 'r')
#model22_hist_json = f.read()
#f.close()
#model22_hist_dict = json.loads(model22_hist_json)
#print(model22.summary())
#print(model22_hist_dict)
score22, pred_score22 = evaluate_model(model22, model22_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model22, 22)
rmse22, mae22, pred_rmse22, pred_mae22 = create_and_save_pred_data(model22, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 22)
model_nums.append(22)
scores.append(score22)
pred_scores.append(pred_score22)
rmse_vals.append(rmse22)
pred_rmse_vals.append(pred_rmse22)
mae_vals.append(mae22)
pred_mae_vals.append(pred_mae22)
model23 = Sequential()
model23.add(LSTM(20, input_shape=(train_x_np.shape[1], train_x_np.shape[2]), return_sequences=True))
model23.add(LSTM(20, return_sequences=False))
model23.add(Dense(50))
model23.add(Dense(1))
model23.summary()
model23.compile(loss = 'mae', optimizer = 'adam')
batch_model23 = 10
model23_history = model23.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model23,
validation_data = (test_x_np, test_y_np))
model23.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model23.h5')
model23_hist_dict = model23_history.history
model23_hist_json = json.dumps(model23_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model23_hist.json', 'w')
f.write(model23_hist_json)
f.close()
#model23 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model23.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model23_hist.json', 'r')
#model23_hist_json = f.read()
#f.close()
#model23_hist_dict = json.loads(model23_hist_json)
#print(model23.summary())
#print(model23_hist_dict)
score23, pred_score23 = evaluate_model(model23, model23_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model23, 23)
rmse23, mae23, pred_rmse23, pred_mae23 = create_and_save_pred_data(model23, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 23)
model_nums.append(23)
scores.append(score23)
pred_scores.append(pred_score23)
rmse_vals.append(rmse23)
pred_rmse_vals.append(pred_rmse23)
mae_vals.append(mae23)
pred_mae_vals.append(pred_mae23)
model24 = Sequential()
model24.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2])))
model24.add(Dense(100))
model24.add(Dense(1))
model24.summary()
model24.compile(loss = 'mae', optimizer = 'adam')
batch_model24 = 10
model24_history = model24.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model24,
validation_data = (test_x_np, test_y_np))
model24.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model24.h5')
model24_hist_dict = model24_history.history
model24_hist_json = json.dumps(model24_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model24_hist.json', 'w')
f.write(model24_hist_json)
f.close()
#model24 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model24.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model24_hist.json', 'r')
#model24_hist_json = f.read()
#f.close()
#model24_hist_dict = json.loads(model24_hist_json)
#print(model24.summary())
#print(model24_hist_dict)
score24, pred_score24 = evaluate_model(model24, model24_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model24, 24)
rmse24, mae24, pred_rmse24, pred_mae24 = create_and_save_pred_data(model24, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 24)
model_nums.append(24)
scores.append(score24)
pred_scores.append(pred_score24)
rmse_vals.append(rmse24)
pred_rmse_vals.append(pred_rmse24)
mae_vals.append(mae24)
pred_mae_vals.append(pred_mae24)
model25 = Sequential()
model25.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2])))
model25.add(Dense(100))
model25.add(Dropout(0.3))
model25.add(Dense(1))
model25.summary()
model25.compile(loss = 'mae', optimizer = 'adam')
batch_model25 = 10
model25_history = model25.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model25,
validation_data = (test_x_np, test_y_np))
model25.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model25.h5')
model25_hist_dict = model25_history.history
model25_hist_json = json.dumps(model25_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model25_hist.json', 'w')
f.write(model25_hist_json)
f.close()
#model25 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model25.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model25_hist.json', 'r')
#model25_hist_json = f.read()
#f.close()
#model25_hist_dict = json.loads(model25_hist_json)
#print(model25.summary())
#print(model25_hist_dict)
score25, pred_score25 = evaluate_model(model25, model25_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model25, 25)
rmse25, mae25, pred_rmse25, pred_mae25 = create_and_save_pred_data(model25, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 25)
model_nums.append(25)
scores.append(score25)
pred_scores.append(pred_score25)
rmse_vals.append(rmse25)
pred_rmse_vals.append(pred_rmse25)
mae_vals.append(mae25)
pred_mae_vals.append(pred_mae25)
model26 = Sequential()
model26.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2]), return_sequences=True))
model26.add(LSTM(25))
model26.add(Dense(1))
model26.summary()
model26.compile(loss = 'mae', optimizer = 'adam')
batch_model26 = 10
model26_history = model26.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model26,
validation_data = (test_x_np, test_y_np))
model26.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model26.h5')
model26_hist_dict = model26_history.history
model26_hist_json = json.dumps(model26_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model26_hist.json', 'w')
f.write(model26_hist_json)
f.close()
#model26 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model26.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model26_hist.json', 'r')
#model26_hist_json = f.read()
#f.close()
#model26_hist_dict = json.loads(model26_hist_json)
#print(model26.summary())
#print(model26_hist_dict)
score26, pred_score26 = evaluate_model(model26, model26_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model26, 26)
rmse26, mae26, pred_rmse26, pred_mae26 = create_and_save_pred_data(model26, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 26)
model_nums.append(26)
scores.append(score26)
pred_scores.append(pred_score26)
rmse_vals.append(rmse26)
pred_rmse_vals.append(pred_rmse26)
mae_vals.append(mae26)
pred_mae_vals.append(pred_mae26)
model27 = Sequential()
model27.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2]), return_sequences=True))
model27.add(LSTM(150))
model27.add(Dense(1))
model27.summary()
model27.compile(loss = 'mae', optimizer = 'adam')
batch_model27 = 10
model27_history = model27.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model27,
validation_data = (test_x_np, test_y_np))
model27.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model27.h5')
model27_hist_dict = model27_history.history
model27_hist_json = json.dumps(model27_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model27_hist.json', 'w')
f.write(model27_hist_json)
f.close()
#model27 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model27.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model27_hist.json', 'r')
#model27_hist_json = f.read()
#f.close()
#model27_hist_dict = json.loads(model27_hist_json)
#print(model27.summary())
#print(model27_hist_dict)
score27, pred_score27 = evaluate_model(model27, model27_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model27, 27)
rmse27, mae27, pred_rmse27, pred_mae27 = create_and_save_pred_data(model27, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 27)
model_nums.append(27)
scores.append(score27)
pred_scores.append(pred_score27)
rmse_vals.append(rmse27)
pred_rmse_vals.append(pred_rmse27)
mae_vals.append(mae27)
pred_mae_vals.append(pred_mae27)
model28 = Sequential()
model28.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2]), return_sequences=True))
model28.add(LSTM(50, return_sequences=True))
model28.add(LSTM(50))
model28.add(Dense(1))
model28.summary()
model28.compile(loss = 'mae', optimizer = 'adam')
batch_model28 = 10
model28_history = model28.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model28,
validation_data = (test_x_np, test_y_np))
model28.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model28.h5')
model28_hist_dict = model28_history.history
model28_hist_json = json.dumps(model28_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model28_hist.json', 'w')
f.write(model28_hist_json)
f.close()
#model28 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model28.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model28_hist.json', 'r')
#model28_hist_json = f.read()
#f.close()
#model28_hist_dict = json.loads(model28_hist_json)
#print(model28.summary())
#print(model28_hist_dict)
score28, pred_score28 = evaluate_model(model28, model28_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model28, 28)
rmse28, mae28, pred_rmse28, pred_mae28 = create_and_save_pred_data(model28, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 28)
model_nums.append(28)
scores.append(score28)
pred_scores.append(pred_score28)
rmse_vals.append(rmse28)
pred_rmse_vals.append(pred_rmse28)
mae_vals.append(mae28)
pred_mae_vals.append(pred_mae28)
model29 = Sequential()
model29.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2])))
model29.add(Dense(1))
model29.summary()
model29.compile(loss = 'mae', optimizer = 'sgd')
batch_model29 = 10
model29_history = model29.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model29,
validation_data = (test_x_np, test_y_np))
model29.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model29.h5')
model29_hist_dict = model29_history.history
model29_hist_json = json.dumps(model29_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model29_hist.json', 'w')
f.write(model29_hist_json)
f.close()
#model29 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model29.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model29_hist.json', 'r')
#model29_hist_json = f.read()
#f.close()
#model29_hist_dict = json.loads(model29_hist_json)
#print(model29.summary())
#print(model29_hist_dict)
score29, pred_score29 = evaluate_model(model29, model29_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model29, 29)
rmse29, mae29, pred_rmse29, pred_mae29 = create_and_save_pred_data(model29, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 29)
model_nums.append(29)
scores.append(score29)
pred_scores.append(pred_score29)
rmse_vals.append(rmse29)
pred_rmse_vals.append(pred_rmse29)
mae_vals.append(mae29)
pred_mae_vals.append(pred_mae29)
model30 = Sequential()
model30.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2])))
model30.add(Dense(1))
model30.summary()
model30.compile(loss = 'mae', optimizer = 'adam')
batch_model30 = 10
model30_history = model30.fit(train_x_np, train_y_np, epochs = 100, batch_size = batch_model30,
validation_data = (test_x_np, test_y_np))
model30.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model30.h5')
model30_hist_dict = model30_history.history
model30_hist_json = json.dumps(model30_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model30_hist.json', 'w')
f.write(model30_hist_json)
f.close()
#model30 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model30.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model30_hist.json', 'r')
#model30_hist_json = f.read()
#f.close()
#model30_hist_dict = json.loads(model30_hist_json)
#print(model30.summary())
#print(model30_hist_dict)
score30, pred_score30 = evaluate_model(model30, model30_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model30, 30)
rmse30, mae30, pred_rmse30, pred_mae30 = create_and_save_pred_data(model30, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 30)
model_nums.append(30)
scores.append(score30)
pred_scores.append(pred_score30)
rmse_vals.append(rmse30)
pred_rmse_vals.append(pred_rmse30)
mae_vals.append(mae30)
pred_mae_vals.append(pred_mae30)
model31 = Sequential()
model31.add(LSTM(50, input_shape=(train_x_np.shape[1], train_x_np.shape[2]), return_sequences=True))
model31.add(LSTM(50, return_sequences=True))
model31.add(LSTM(50, return_sequences=False))
model31.add(Dense(50))
model31.add(Dense(1))
model31.summary()
model31.compile(loss = 'mae', optimizer = 'adam')
batch_model31 = 10
model31_history = model31.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model31,
validation_data = (test_x_np, test_y_np))
model31.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model31.h5')
model31_hist_dict = model31_history.history
model31_hist_json = json.dumps(model31_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model31_hist.json', 'w')
f.write(model31_hist_json)
f.close()
#model31 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model31.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model31_hist.json', 'r')
#model31_hist_json = f.read()
#f.close()
#model31_hist_dict = json.loads(model31_hist_json)
#print(model31.summary())
#print(model31_hist_dict)
score31, pred_score31 = evaluate_model(model31, model31_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model31, 31)
rmse31, mae31, pred_rmse31, pred_mae31 = create_and_save_pred_data(model31, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 31)
model_nums.append(31)
scores.append(score31)
pred_scores.append(pred_score31)
rmse_vals.append(rmse31)
pred_rmse_vals.append(pred_rmse31)
mae_vals.append(mae31)
pred_mae_vals.append(pred_mae31)
model32 = Sequential()
model32.add(LSTM(20, input_shape=(train_x_np.shape[1], train_x_np.shape[2]), return_sequences=True))
model32.add(LSTM(20, return_sequences=True))
model32.add(LSTM(20))
model32.add(Dense(1))
model32.summary()
model32.compile(loss = 'mae', optimizer = 'adam')
batch_model32 = 10
model32_history = model32.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model32,
validation_data = (test_x_np, test_y_np))
model32.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model32.h5')
model32_hist_dict = model32_history.history
model32_hist_json = json.dumps(model32_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model32_hist.json', 'w')
f.write(model32_hist_json)
f.close()
#model32 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model32.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model32_hist.json', 'r')
#model32_hist_json = f.read()
#f.close()
#model32_hist_dict = json.loads(model32_hist_json)
#print(model32.summary())
#print(model32_hist_dict)
score32, pred_score32 = evaluate_model(model32, model32_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model32, 32)
rmse32, mae32, pred_rmse32, pred_mae32 = create_and_save_pred_data(model32, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 32)
model_nums.append(32)
scores.append(score32)
pred_scores.append(pred_score32)
rmse_vals.append(rmse32)
pred_rmse_vals.append(pred_rmse32)
mae_vals.append(mae32)
pred_mae_vals.append(pred_mae32)
model33 = Sequential()
model33.add(LSTM(20, input_shape=(train_x_np.shape[1], train_x_np.shape[2]), return_sequences=True))
model33.add(LSTM(20, return_sequences=True))
model33.add(LSTM(20, return_sequences=False))
model33.add(Dense(50))
model33.add(Dense(1))
model33.summary()
model33.compile(loss = 'mae', optimizer = 'adam')
batch_model33 = 10
model33_history = model33.fit(train_x_np, train_y_np, epochs = 20, batch_size = batch_model33,
validation_data = (test_x_np, test_y_np))
model33.save('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model33.h5')
model33_hist_dict = model33_history.history
model33_hist_json = json.dumps(model33_hist_dict)
f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model33_hist.json', 'w')
f.write(model33_hist_json)
f.close()
#model33 = keras.models.load_model('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model33.h5')
#f = open('C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\model33_hist.json', 'r')
#model33_hist_json = f.read()
#f.close()
#model33_hist_dict = json.loads(model33_hist_json)
#print(model33.summary())
#print(model33_hist_dict)
score33, pred_score33 = evaluate_model(model33, model33_hist_dict, test_x_np, test_y_np, pred_test_x_np, pred_test_y_np,
batch_model33, 33)
rmse33, mae33, pred_rmse33, pred_mae33 = create_and_save_pred_data(model33, test_x_np, test_norm_df, test_y_np,
pred_test_x_np, pred_test_norm_df, pred_test_y_np, 33)
model_nums.append(33)
scores.append(score33)
pred_scores.append(pred_score33)
rmse_vals.append(rmse33)
pred_rmse_vals.append(pred_rmse33)
mae_vals.append(mae33)
pred_mae_vals.append(pred_mae33)
# "bo" is for "blue dot"
plt.plot(model_nums, scores, 'ro', label = 'Test Loss')
# b is for "solid blue line"
plot_title = 'Test and Kaggle Test Normalized Data Losses'
plt.plot(model_nums, pred_scores, 'bo', label = 'Kaggle Test Loss')
plt.title(plot_title)
plt.xlabel('Model Number')
plt.ylabel('Loss')
plt.legend()
plt.show()
# "bo" is for "blue dot"
plt.plot(model_nums, rmse_vals, 'ro', label = 'Test RMSE')
# b is for "solid blue line"
plot_title = 'Test and Kaggle Test Inverse Transformed Data RMSE'
plt.plot(model_nums, pred_rmse_vals, 'bo', label = 'Kaggle Test RMSE')
plt.title(plot_title)
plt.xlabel('Model Number')
plt.ylabel('RMSE')
plt.legend()
plt.show()
# "bo" is for "blue dot"
plt.plot(model_nums, mae_vals, 'ro', label = 'Test MAE')
# b is for "solid blue line"
plot_title = 'Test and Kaggle Test Inverse Transformed Data MAE'
plt.plot(model_nums, pred_mae_vals, 'bo', label = 'Kaggle Test MAE')
plt.title(plot_title)
plt.xlabel('Model Number')
plt.ylabel('MAE')
plt.legend()
plt.show()
results_df = pd.DataFrame(model_nums, columns = ['model_nums'])
results_df['scores'] = scores
results_df['pred_scores'] = pred_scores
results_df['rmse_vals'] = rmse_vals
results_df['pred_rmse_vals'] = pred_rmse_vals
results_df['mae_vals'] = mae_vals
results_df['pred_mae_vals'] = pred_mae_vals
min_vals = results_df.min()
pred_mae_min = min_vals[6]
min_ndxs = results_df.idxmin()
model_min = min_ndxs[6]
print('Minimum MAE: %.6f' % pred_mae_min)
print('Model with Minimum MAE: %d' % model_min)
results_df
res_out_file = 'C:\\DePaulCoursework\\Fall 2018 CSC 578\\Assignments\\Final Project\\Outputs\\All_Model_Results.csv'
results_df.to_csv(res_out_file, header = True, index = False)